Generated Image¶

In [54]:
Ashtami Vijayan pilla :500224260
Melvin Raju :500223500

Importing Libraries¶

In [ ]:
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow.keras import layers, models, backend as K
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import shutil
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.preprocessing import image
from scipy.linalg import sqrtm
from numpy import cov, trace, iscomplexobj
from numpy.random import random
from scipy.stats import entropy
from IPython.display import Image
In [ ]:
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive

Mounting the Google Drive¶

In [ ]:
from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).

Reading the images from the drive¶

In [ ]:
base_dir = '/content/drive/MyDrive/FaceExpressions/dataset'
categories = ['Angry', 'Surprise', 'Sad', 'Ahegao', 'Happy', 'Neutral']

for category in categories:
    print(f'{category}:')
    path = os.path.join(base_dir, category)
    print(os.listdir(path)[:5])  # List first 5 files in each category
Angry:
['295f94b25bd4406ecebe9791a9531b38c6e5ce69fd37ead8f4f9ffdb~angry.png', '4b33a2f540fd37977aca8ede305989e23e76738f89c1c16ca9a2a620~angry.jpg', '4fd88a0a743c84a624347e2c761bcd7015a929d31b40f9a36e7ee259~angry.png', 'cropped_emotions.231374~angry.png', 'cropped_emotions.158081~angry.png']
Surprise:
['cropped_emotions.100270~12fffff.png', 'cropped_emotions.100276~12fffff.png', 'cropped_emotions.100279~12fffff.png', 'cropped_emotions.100207~12fffff.png', 'cropped_emotions.100218~12fffff.png']
Sad:
['cropped_emotions.163492.png', 'cropped_emotions.164034.png', 'cropped_emotions.153406.png', 'cropped_emotions.164233.png', 'cropped_emotions.164347.png']
Ahegao:
['cropped_emotions.189154~ahegao.png', 'cropped_emotions.190427~ahegao.png', 'lol140~ahegao.png', 'lol795~ahegao.png']
Happy:
[]
Neutral:
['0b2f9d06b850023dae03f02fc6c56a6b4b3684b3b63331838edc91ddf.jpg', '0b195e0b51a44f69d06de0dbfb1893df73769c6b065c2574ff3693cff.jpg', '0b3161a4ccc3aa541df0a73f00d106ff0912c0bb9e4a0642e9bac59af.jpg', '0b0b6d01330839782219ab3d938414cf5efeec8cd79aa2cc3d9a9e95f.jpg', '0b00131d449254c09c9b2c3b295ec677940f889a3aac6a90e6982a5bf.jpg']

Displaying images form each folder¶

In [ ]:
# Function to display images
def display_images(category, image_files):
    plt.figure(figsize=(20, 10))
    for i, img_file in enumerate(image_files):
        img_path = os.path.join(base_dir, category, img_file)
        img = mpimg.imread(img_path)
        plt.subplot(1, 5, i + 1)
        plt.imshow(img)
        plt.axis('off')
        plt.title(category)
    plt.show()

# Iterate through each category and display the first 5 images
for category in categories:
    path = os.path.join(base_dir, category)
    image_files = os.listdir(path)[:5]
    print(f'Displaying first 5 images from category: {category}')
    display_images(category, image_files)
Displaying first 5 images from category: Angry
Displaying first 5 images from category: Surprise
Displaying first 5 images from category: Sad
Displaying first 5 images from category: Ahegao
Displaying first 5 images from category: Happy
<Figure size 2000x1000 with 0 Axes>
Displaying first 5 images from category: Neutral

Load Images using "ImageDataGenerator" from keras.preprocessing.image¶

In [ ]:
datagen = ImageDataGenerator(rescale=1./255)

image_size = (128, 128)  # Resize images to 128x128

train_generator = datagen.flow_from_directory(
    base_dir,
    target_size=image_size,
    batch_size=32,
    class_mode='input',  # Input because we are using these images for unsupervised learning
    shuffle=True
)
Found 6793 images belonging to 6 classes.

Creating a subset folder¶

In [ ]:
# Create a new directory for the subset of images
subset_dir = '/content/subset_dataset'
os.makedirs(subset_dir, exist_ok=True)

Loading sample images¶

In [ ]:
# Copy 100 images from each category to the new directory
for category in categories:
    category_dir = os.path.join(base_dir, category)
    subset_category_dir = os.path.join(subset_dir, category)
    os.makedirs(subset_category_dir, exist_ok=True)

    image_files = os.listdir(category_dir)[:200]
    for img_file in image_files:
        shutil.copy(os.path.join(category_dir, img_file), subset_category_dir)
In [ ]:
# Verify the new dataset
for category in categories:
    subset_category_dir = os.path.join(subset_dir, category)
    print(f'{category}: {len(os.listdir(subset_category_dir))} images')
Angry: 24 images
Surprise: 200 images
Sad: 200 images
Ahegao: 4 images
Happy: 0 images
Neutral: 200 images
In [ ]:
# Define image size and latent dimension
image_size = (128, 128)
latent_dim = 2

Loading the sample images¶

In [ ]:
# Data Generator
datagen = ImageDataGenerator(rescale=1./255)
train_generator = datagen.flow_from_directory(
    subset_dir,
    target_size=image_size,
    batch_size=32,
    class_mode='input',
    shuffle=True
)
Found 628 images belonging to 6 classes.

Building the Variational Autoencoder¶

Encoder¶

In [ ]:
# Encoder
encoder_inputs = tf.keras.Input(shape=(image_size[0], image_size[1], 3))
x = layers.Conv2D(32, 3, activation='relu', padding='same')(encoder_inputs)
x = layers.MaxPooling2D()(x)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
x = layers.MaxPooling2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation='relu')(x)
z_mean = layers.Dense(latent_dim)(x)
z_log_var = layers.Dense(latent_dim)(x)
In [ ]:
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon

z = layers.Lambda(sampling)([z_mean, z_log_var])
In [ ]:
encoder = tf.keras.Model(encoder_inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
Model: "encoder"
__________________________________________________________________________________________________
 Layer (type)                Output Shape                 Param #   Connected to                  
==================================================================================================
 input_1 (InputLayer)        [(None, 128, 128, 3)]        0         []                            
                                                                                                  
 conv2d (Conv2D)             (None, 128, 128, 32)         896       ['input_1[0][0]']             
                                                                                                  
 max_pooling2d (MaxPooling2  (None, 64, 64, 32)           0         ['conv2d[0][0]']              
 D)                                                                                               
                                                                                                  
 conv2d_1 (Conv2D)           (None, 64, 64, 64)           18496     ['max_pooling2d[0][0]']       
                                                                                                  
 max_pooling2d_1 (MaxPoolin  (None, 32, 32, 64)           0         ['conv2d_1[0][0]']            
 g2D)                                                                                             
                                                                                                  
 flatten (Flatten)           (None, 65536)                0         ['max_pooling2d_1[0][0]']     
                                                                                                  
 dense (Dense)               (None, 128)                  8388736   ['flatten[0][0]']             
                                                                                                  
 dense_1 (Dense)             (None, 2)                    258       ['dense[0][0]']               
                                                                                                  
 dense_2 (Dense)             (None, 2)                    258       ['dense[0][0]']               
                                                                                                  
 lambda (Lambda)             (None, 2)                    0         ['dense_1[0][0]',             
                                                                     'dense_2[0][0]']             
                                                                                                  
==================================================================================================
Total params: 8408644 (32.08 MB)
Trainable params: 8408644 (32.08 MB)
Non-trainable params: 0 (0.00 Byte)
__________________________________________________________________________________________________

Decoder¶

In [ ]:
# Decoder
latent_inputs = tf.keras.Input(shape=(latent_dim,))
x = layers.Dense(32 * 32 * 64, activation='relu')(latent_inputs)
x = layers.Reshape((32, 32, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation='relu', padding='same')(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2DTranspose(32, 3, activation='relu', padding='same')(x)
x = layers.UpSampling2D()(x)
decoder_outputs = layers.Conv2DTranspose(3, 3, activation='sigmoid', padding='same')(x)
In [ ]:
decoder = tf.keras.Model(latent_inputs, decoder_outputs, name='decoder')
decoder.summary()
Model: "decoder"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 input_2 (InputLayer)        [(None, 2)]               0         
                                                                 
 dense_3 (Dense)             (None, 65536)             196608    
                                                                 
 reshape (Reshape)           (None, 32, 32, 64)        0         
                                                                 
 conv2d_transpose (Conv2DTr  (None, 32, 32, 64)        36928     
 anspose)                                                        
                                                                 
 up_sampling2d (UpSampling2  (None, 64, 64, 64)        0         
 D)                                                              
                                                                 
 conv2d_transpose_1 (Conv2D  (None, 64, 64, 32)        18464     
 Transpose)                                                      
                                                                 
 up_sampling2d_1 (UpSamplin  (None, 128, 128, 32)      0         
 g2D)                                                            
                                                                 
 conv2d_transpose_2 (Conv2D  (None, 128, 128, 3)       867       
 Transpose)                                                      
                                                                 
=================================================================
Total params: 252867 (987.76 KB)
Trainable params: 252867 (987.76 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
In [ ]:
# VAE model
outputs = decoder(encoder(encoder_inputs)[2])
vae = tf.keras.Model(encoder_inputs, outputs, name='vae')
In [ ]:
# VAE loss
reconstruction_loss = tf.keras.losses.binary_crossentropy(K.flatten(encoder_inputs), K.flatten(outputs))
reconstruction_loss *= image_size[0] * image_size[1]
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
vae.summary()
Model: "vae"
__________________________________________________________________________________________________
 Layer (type)                Output Shape                 Param #   Connected to                  
==================================================================================================
 input_1 (InputLayer)        [(None, 128, 128, 3)]        0         []                            
                                                                                                  
 encoder (Functional)        [(None, 2),                  8408644   ['input_1[0][0]']             
                              (None, 2),                                                          
                              (None, 2)]                                                          
                                                                                                  
 decoder (Functional)        (None, 128, 128, 3)          252867    ['encoder[0][2]']             
                                                                                                  
 conv2d (Conv2D)             (None, 128, 128, 32)         896       ['input_1[0][0]']             
                                                                                                  
 max_pooling2d (MaxPooling2  (None, 64, 64, 32)           0         ['conv2d[0][0]']              
 D)                                                                                               
                                                                                                  
 conv2d_1 (Conv2D)           (None, 64, 64, 64)           18496     ['max_pooling2d[0][0]']       
                                                                                                  
 max_pooling2d_1 (MaxPoolin  (None, 32, 32, 64)           0         ['conv2d_1[0][0]']            
 g2D)                                                                                             
                                                                                                  
 flatten (Flatten)           (None, 65536)                0         ['max_pooling2d_1[0][0]']     
                                                                                                  
 dense (Dense)               (None, 128)                  8388736   ['flatten[0][0]']             
                                                                                                  
 dense_2 (Dense)             (None, 2)                    258       ['dense[0][0]']               
                                                                                                  
 dense_1 (Dense)             (None, 2)                    258       ['dense[0][0]']               
                                                                                                  
 tf.reshape (TFOpLambda)     (None,)                      0         ['input_1[0][0]']             
                                                                                                  
 tf.reshape_1 (TFOpLambda)   (None,)                      0         ['decoder[0][0]']             
                                                                                                  
 tf.__operators__.add (TFOp  (None, 2)                    0         ['dense_2[0][0]']             
 Lambda)                                                                                          
                                                                                                  
 tf.math.square (TFOpLambda  (None, 2)                    0         ['dense_1[0][0]']             
 )                                                                                                
                                                                                                  
 tf.cast (TFOpLambda)        (None,)                      0         ['tf.reshape[0][0]']          
                                                                                                  
 tf.convert_to_tensor (TFOp  (None,)                      0         ['tf.reshape_1[0][0]']        
 Lambda)                                                                                          
                                                                                                  
 tf.math.subtract (TFOpLamb  (None, 2)                    0         ['tf.__operators__.add[0][0]',
 da)                                                                 'tf.math.square[0][0]']      
                                                                                                  
 tf.math.exp (TFOpLambda)    (None, 2)                    0         ['dense_2[0][0]']             
                                                                                                  
 tf.keras.backend.binary_cr  (None,)                      0         ['tf.cast[0][0]',             
 ossentropy (TFOpLambda)                                             'tf.convert_to_tensor[0][0]']
                                                                                                  
 tf.math.subtract_1 (TFOpLa  (None, 2)                    0         ['tf.math.subtract[0][0]',    
 mbda)                                                               'tf.math.exp[0][0]']         
                                                                                                  
 tf.math.reduce_mean (TFOpL  ()                           0         ['tf.keras.backend.binary_cros
 ambda)                                                             sentropy[0][0]']              
                                                                                                  
 tf.math.reduce_sum (TFOpLa  (None,)                      0         ['tf.math.subtract_1[0][0]']  
 mbda)                                                                                            
                                                                                                  
 tf.math.multiply (TFOpLamb  ()                           0         ['tf.math.reduce_mean[0][0]'] 
 da)                                                                                              
                                                                                                  
 tf.math.multiply_1 (TFOpLa  (None,)                      0         ['tf.math.reduce_sum[0][0]']  
 mbda)                                                                                            
                                                                                                  
 tf.__operators__.add_1 (TF  (None,)                      0         ['tf.math.multiply[0][0]',    
 OpLambda)                                                           'tf.math.multiply_1[0][0]']  
                                                                                                  
 tf.math.reduce_mean_1 (TFO  ()                           0         ['tf.__operators__.add_1[0][0]
 pLambda)                                                           ']                            
                                                                                                  
 add_loss (AddLoss)          ()                           0         ['tf.math.reduce_mean_1[0][0]'
                                                                    ]                             
                                                                                                  
==================================================================================================
Total params: 8661511 (33.04 MB)
Trainable params: 8661511 (33.04 MB)
Non-trainable params: 0 (0.00 Byte)
__________________________________________________________________________________________________

Training¶

In [ ]:
# Training
epochs = 10
vae.fit(train_generator, epochs=epochs)
Epoch 1/10
20/20 [==============================] - 77s 3s/step - loss: 11084.1191
Epoch 2/10
20/20 [==============================] - 65s 3s/step - loss: 10786.1504
Epoch 3/10
20/20 [==============================] - 64s 3s/step - loss: 10473.9512
Epoch 4/10
20/20 [==============================] - 62s 3s/step - loss: 10304.8027
Epoch 5/10
20/20 [==============================] - 65s 3s/step - loss: 10274.2529
Epoch 6/10
20/20 [==============================] - 66s 3s/step - loss: 10219.9023
Epoch 7/10
20/20 [==============================] - 65s 3s/step - loss: 10183.4873
Epoch 8/10
20/20 [==============================] - 65s 3s/step - loss: 10173.6689
Epoch 9/10
20/20 [==============================] - 72s 4s/step - loss: 10175.3145
Epoch 10/10
20/20 [==============================] - 65s 3s/step - loss: 10160.9453
Out[ ]:
<keras.src.callbacks.History at 0x7869c07954e0>

Model Evaluation¶

In [ ]:
# Load pretrained InceptionV3 model
inception_model = InceptionV3(include_top=False, pooling='avg', input_shape=(299, 299, 3))

def calculate_fid(real_images, generated_images):
    # Calculate activations
    act1 = inception_model.predict(real_images)
    act2 = inception_model.predict(generated_images)

    # Calculate mean and covariance
    mu1, sigma1 = act1.mean(axis=0), cov(act1, rowvar=False)
    mu2, sigma2 = act2.mean(axis=0), cov(act2, rowvar=False)

    # Calculate sum squared difference between means
    ssdiff = np.sum((mu1 - mu2) ** 2.0)

    # Calculate sqrt of product between covariances
    covmean = sqrtm(sigma1.dot(sigma2))

    # Check and correct imaginary numbers from sqrtm
    if iscomplexobj(covmean):
        covmean = covmean.real

    fid = ssdiff + trace(sigma1 + sigma2 - 2.0 * covmean)
    return fid
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5
87910968/87910968 [==============================] - 2s 0us/step
In [ ]:
# Function to preprocess images for InceptionV3
def preprocess_images(imgs):
    imgs = tf.image.resize(imgs, (299, 299))
    imgs = preprocess_input(imgs)
    return imgs

Real Images

In [ ]:
# Load Real Images
real_datagen = ImageDataGenerator(rescale=1./255)
real_generator = real_datagen.flow_from_directory(
    subset_dir,
    target_size=(299, 299),  # Resize to 299x299 for InceptionV3
    batch_size=600,  # Load all 600 real images (100 per category)
    class_mode=None,
    shuffle=True
)
real_images = next(real_generator)
Found 628 images belonging to 6 classes.

Generated Images

In [ ]:
# Generate Images using VAE
def generate_images(model, latent_dim, num_images):
    z_sample = np.random.normal(size=(num_images, latent_dim))
    generated_images = model.predict(z_sample)
    return generated_images

generated_images = generate_images(decoder, latent_dim, 600)
generated_images_resized = tf.image.resize(generated_images, (299, 299))
19/19 [==============================] - 6s 327ms/step

Preprocess Real and Generated images

In [ ]:
# Preprocess images for InceptionV3
real_images_preprocessed = preprocess_input(real_images)
generated_images_preprocessed = preprocess_input(generated_images_resized)
In [ ]:
# Calculate FID
fid = calculate_fid(real_images_preprocessed, generated_images_preprocessed)
print('FID:', fid)
19/19 [==============================] - 155s 8s/step
19/19 [==============================] - 152s 8s/step
FID: 1.3319541321978917e+53
In [ ]:
# Ensure generated images are in the correct format (299x299x3) for InceptionV3
generated_images_resized_inception = np.array(generated_images_resized)
In [ ]:
# Ensure images are preprocessed for InceptionV3 model
def preprocess_images(images):
    images = preprocess_input(images)
    return images
In [ ]:
# Preprocess generated images
generated_images_preprocessed = preprocess_images(generated_images_resized_inception)

# Predict class probabilities for generated images
preds = inception_model.predict(generated_images_preprocessed)
19/19 [==============================] - 158s 8s/step
In [ ]:
def calculate_inception_score(preds, num_splits=10):
    scores = []
    split_size = preds.shape[0] // num_splits

    for i in range(num_splits):
        split_preds = preds[i * split_size:(i + 1) * split_size]
        p_yx = np.mean(split_preds, axis=0)  # Average class probabilities across the split
        scores.append(entropy(p_yx, base=2))  # Compute entropy of the average probabilities

    inception_score = np.exp(np.mean(scores))
    return inception_score
In [ ]:
# Calculate Inception Score
inception_score = calculate_inception_score(preds)
print('Inception Score:', inception_score)
Inception Score: 5902.863071669922
In [ ]:
print('FID:', fid)
FID: 1.3319541321978917e+53

Generate Images¶

In [ ]:
# Define the categories
categories = ['Angry', 'Surprise', 'Sad', 'Ahegao', 'Happy', 'Neutral']

# Generate one image for each category
def generate_images_per_category(model, latent_dim, num_images=1):
    generated_images = []
    for _ in range(num_images * len(categories)):
        z_sample = np.random.normal(size=(1, latent_dim))
        generated_image = model.predict(z_sample)
        generated_images.append(generated_image[0])
    return np.array(generated_images)
In [ ]:
# Generate images
generated_images = generate_images_per_category(decoder, latent_dim, num_images=1)
1/1 [==============================] - 0s 76ms/step
1/1 [==============================] - 0s 60ms/step
1/1 [==============================] - 0s 52ms/step
1/1 [==============================] - 0s 55ms/step
1/1 [==============================] - 0s 56ms/step
1/1 [==============================] - 0s 64ms/step
In [ ]:
# Plot generated images
def plot_generated_images(images, categories):
    plt.figure(figsize=(20, 10))
    for i, img in enumerate(images):
        plt.subplot(1, len(categories), i + 1)
        plt.imshow((img * 255).astype(np.uint8))
        plt.axis('off')
        plt.title(categories[i])
    plt.show()

plot_generated_images(generated_images, categories)
In [ ]:
# Define the categories
categories = ['Angry', 'Surprise', 'Sad', 'Ahegao', 'Happy', 'Neutral']

# Function to generate and save faces
def save_generated_faces(model, latent_dim, categories, save_dir='/content/generated_faces'):
    os.makedirs(save_dir, exist_ok=True)
    for i, category in enumerate(categories):
        z_sample = np.random.normal(size=(1, latent_dim))
        generated_image = model.predict(z_sample)
        generated_image = (generated_image[0] * 255).astype(np.uint8)
        plt.imsave(os.path.join(save_dir, f'{category}.png'), generated_image)
save_generated_faces(decoder, latent_dim, categories)
1/1 [==============================] - 0s 59ms/step
1/1 [==============================] - 0s 51ms/step
1/1 [==============================] - 0s 59ms/step
1/1 [==============================] - 0s 52ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 61ms/step
In [ ]:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models

# Example VAE definition
latent_dim = 2

# Encoder
encoder_inputs = tf.keras.Input(shape=(28, 28, 1))
x = layers.Flatten()(encoder_inputs)
x = layers.Dense(128, activation='relu')(x)
z_mean = layers.Dense(latent_dim, name='z_mean')(x)
z_log_var = layers.Dense(latent_dim, name='z_log_var')(x)

# Sampling layer
def sampling(args):
    z_mean, z_log_var = args
    batch = tf.shape(z_mean)[0]
    dim = tf.shape(z_mean)[1]
    epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
    return z_mean + tf.exp(0.5 * z_log_var) * epsilon

z = layers.Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

encoder = models.Model(encoder_inputs, [z_mean, z_log_var, z], name='encoder')

# Decoder
decoder_inputs = tf.keras.Input(shape=(latent_dim,))
x = layers.Dense(128, activation='relu')(decoder_inputs)
x = layers.Dense(28 * 28, activation='sigmoid')(x)
decoder_outputs = layers.Reshape((28, 28, 1))(x)

decoder = models.Model(decoder_inputs, decoder_outputs, name='decoder')

# VAE
outputs = decoder(encoder(encoder_inputs)[2])
vae = models.Model(encoder_inputs, outputs, name='vae')

# Compile and train the VAE
vae.compile(optimizer='adam', loss='binary_crossentropy')
(x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1) / 255.0
vae.fit(x_train, x_train, epochs=10, batch_size=32)
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 [==============================] - 0s 0us/step
Epoch 1/10
1875/1875 [==============================] - 18s 9ms/step - loss: 0.2306
Epoch 2/10
1875/1875 [==============================] - 17s 9ms/step - loss: 0.2022
Epoch 3/10
1875/1875 [==============================] - 17s 9ms/step - loss: 0.1964
Epoch 4/10
1875/1875 [==============================] - 16s 9ms/step - loss: 0.1929
Epoch 5/10
1875/1875 [==============================] - 16s 9ms/step - loss: 0.1905
Epoch 6/10
1875/1875 [==============================] - 18s 10ms/step - loss: 0.1889
Epoch 7/10
1875/1875 [==============================] - 18s 9ms/step - loss: 0.1875
Epoch 8/10
1875/1875 [==============================] - 18s 9ms/step - loss: 0.1865
Epoch 9/10
1875/1875 [==============================] - 17s 9ms/step - loss: 0.1856
Epoch 10/10
1875/1875 [==============================] - 17s 9ms/step - loss: 0.1847
Out[ ]:
<keras.src.callbacks.History at 0x7869b3c3f460>
In [ ]:
vae.save('vae_model.h5')
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3103: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
  saving_api.save_model(
In [ ]:
encoder.save('encoder_model.h5')
decoder.save('decoder_model.h5')
WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
In [ ]:
from google.colab import files

# Download the VAE model
files.download('vae_model.h5')

# Download the encoder and decoder models
files.download('encoder_model.h5')
files.download('decoder_model.h5')
In [ ]:
pip install streamlit
Collecting streamlit
  Downloading streamlit-1.36.0-py2.py3-none-any.whl (8.6 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 8.6/8.6 MB 13.1 MB/s eta 0:00:00
Requirement already satisfied: altair<6,>=4.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.2.2)
Requirement already satisfied: blinker<2,>=1.0.0 in /usr/lib/python3/dist-packages (from streamlit) (1.4)
Requirement already satisfied: cachetools<6,>=4.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (5.4.0)
Requirement already satisfied: click<9,>=7.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (8.1.7)
Requirement already satisfied: numpy<3,>=1.20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (1.25.2)
Requirement already satisfied: packaging<25,>=20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (24.1)
Requirement already satisfied: pandas<3,>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (2.0.3)
Requirement already satisfied: pillow<11,>=7.1.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (9.4.0)
Requirement already satisfied: protobuf<6,>=3.20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (3.20.3)
Requirement already satisfied: pyarrow>=7.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (14.0.2)
Requirement already satisfied: requests<3,>=2.27 in /usr/local/lib/python3.10/dist-packages (from streamlit) (2.31.0)
Requirement already satisfied: rich<14,>=10.14.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (13.7.1)
Requirement already satisfied: tenacity<9,>=8.1.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (8.5.0)
Requirement already satisfied: toml<2,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from streamlit) (0.10.2)
Requirement already satisfied: typing-extensions<5,>=4.3.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.12.2)
Collecting gitpython!=3.1.19,<4,>=3.0.7 (from streamlit)
  Downloading GitPython-3.1.43-py3-none-any.whl (207 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 207.3/207.3 kB 16.1 MB/s eta 0:00:00
Collecting pydeck<1,>=0.8.0b4 (from streamlit)
  Downloading pydeck-0.9.1-py2.py3-none-any.whl (6.9 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.9/6.9 MB 21.7 MB/s eta 0:00:00
Requirement already satisfied: tornado<7,>=6.0.3 in /usr/local/lib/python3.10/dist-packages (from streamlit) (6.3.3)
Collecting watchdog<5,>=2.1.5 (from streamlit)
  Downloading watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl (83 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 83.0/83.0 kB 7.4 MB/s eta 0:00:00
Requirement already satisfied: entrypoints in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (0.4)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (3.1.4)
Requirement already satisfied: jsonschema>=3.0 in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (4.19.2)
Requirement already satisfied: toolz in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (0.12.1)
Collecting gitdb<5,>=4.0.1 (from gitpython!=3.1.19,<4,>=3.0.7->streamlit)
  Downloading gitdb-4.0.11-py3-none-any.whl (62 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 62.7/62.7 kB 3.4 MB/s eta 0:00:00
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2023.4)
Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2024.1)
Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (3.7)
Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (2.0.7)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (2024.7.4)
Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich<14,>=10.14.0->streamlit) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich<14,>=10.14.0->streamlit) (2.16.1)
Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit)
  Downloading smmap-5.0.1-py3-none-any.whl (24 kB)
Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->altair<6,>=4.0->streamlit) (2.1.5)
Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (23.2.0)
Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (2023.12.1)
Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.35.1)
Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.19.0)
Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit) (0.1.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas<3,>=1.3.0->streamlit) (1.16.0)
Installing collected packages: watchdog, smmap, pydeck, gitdb, gitpython, streamlit
Successfully installed gitdb-4.0.11 gitpython-3.1.43 pydeck-0.9.1 smmap-5.0.1 streamlit-1.36.0 watchdog-4.0.1
In [ ]:
import streamlit as st
import numpy as np
from keras.models import load_model
import matplotlib.pyplot as plt
import io
In [ ]:
# Load your trained VAE model
model = load_model('/content/vae_model.h5')

# Function to encode the keyword into a numerical format
def encode_keyword(keyword, vocab):
    encoded = np.zeros(len(vocab))
    if keyword in vocab:
        encoded[vocab.index(keyword)] = 1
    return encoded

# Example vocabulary
vocab = ['happy', 'sad', 'angry', 'surprised', 'neutral']

# Streamlit app
st.title("Image Generation using VAE")

keyword = st.selectbox('Select a keyword:', vocab)
generate_button = st.button('Generate Image')

if generate_button:
    condition = encode_keyword(keyword, vocab)

    # Generate random noise
    noise = np.random.normal(0, 1, (1, 100))  # Adjust the dimensions according to your model input

    # Combine noise and condition
    input_vector = np.concatenate([noise, condition.reshape(1, -1)], axis=1)
2024-07-22 02:03:25.305 
  Warning: to view this Streamlit app on a browser, run it with the following
  command:

    streamlit run /usr/local/lib/python3.10/dist-packages/colab_kernel_launcher.py [ARGUMENTS]
2024-07-22 02:03:25.316 Session state does not function when running a script without `streamlit run`
In [ ]:
!pip install streamlit numpy keras matplotlib
Requirement already satisfied: streamlit in /usr/local/lib/python3.10/dist-packages (1.36.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (1.25.2)
Requirement already satisfied: keras in /usr/local/lib/python3.10/dist-packages (2.15.0)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (3.7.1)
Requirement already satisfied: altair<6,>=4.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.2.2)
Requirement already satisfied: blinker<2,>=1.0.0 in /usr/lib/python3/dist-packages (from streamlit) (1.4)
Requirement already satisfied: cachetools<6,>=4.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (5.4.0)
Requirement already satisfied: click<9,>=7.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (8.1.7)
Requirement already satisfied: packaging<25,>=20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (24.1)
Requirement already satisfied: pandas<3,>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (2.0.3)
Requirement already satisfied: pillow<11,>=7.1.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (9.4.0)
Requirement already satisfied: protobuf<6,>=3.20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (3.20.3)
Requirement already satisfied: pyarrow>=7.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (14.0.2)
Requirement already satisfied: requests<3,>=2.27 in /usr/local/lib/python3.10/dist-packages (from streamlit) (2.31.0)
Requirement already satisfied: rich<14,>=10.14.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (13.7.1)
Requirement already satisfied: tenacity<9,>=8.1.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (8.5.0)
Requirement already satisfied: toml<2,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from streamlit) (0.10.2)
Requirement already satisfied: typing-extensions<5,>=4.3.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.12.2)
Requirement already satisfied: gitpython!=3.1.19,<4,>=3.0.7 in /usr/local/lib/python3.10/dist-packages (from streamlit) (3.1.43)
Requirement already satisfied: pydeck<1,>=0.8.0b4 in /usr/local/lib/python3.10/dist-packages (from streamlit) (0.9.1)
Requirement already satisfied: tornado<7,>=6.0.3 in /usr/local/lib/python3.10/dist-packages (from streamlit) (6.3.3)
Requirement already satisfied: watchdog<5,>=2.1.5 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.0.1)
Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.2.1)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (0.12.1)
Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (4.53.1)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.4.5)
Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (3.1.2)
Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (2.8.2)
Requirement already satisfied: entrypoints in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (0.4)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (3.1.4)
Requirement already satisfied: jsonschema>=3.0 in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (4.19.2)
Requirement already satisfied: toolz in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (0.12.1)
Requirement already satisfied: gitdb<5,>=4.0.1 in /usr/local/lib/python3.10/dist-packages (from gitpython!=3.1.19,<4,>=3.0.7->streamlit) (4.0.11)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2023.4)
Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2024.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)
Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (3.7)
Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (2.0.7)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (2024.7.4)
Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich<14,>=10.14.0->streamlit) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich<14,>=10.14.0->streamlit) (2.16.1)
Requirement already satisfied: smmap<6,>=3.0.1 in /usr/local/lib/python3.10/dist-packages (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit) (5.0.1)
Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->altair<6,>=4.0->streamlit) (2.1.5)
Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (23.2.0)
Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (2023.12.1)
Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.35.1)
Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.19.0)
Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit) (0.1.2)
In [ ]:
# Write the Streamlit app to a file
%%writefile app.py
import streamlit as st
import numpy as np
from keras.models import load_model
import matplotlib.pyplot as plt
import io
Writing app.py
In [ ]:
# Load your trained VAE model
model = load_model('/content/vae_model.h5')
In [ ]:
# Function to encode the keyword into a numerical format
def encode_keyword(keyword, vocab):
    encoded = np.zeros(len(vocab))
    if keyword in vocab:
        encoded[vocab.index(keyword)] = 1
    return encoded

# Example vocabulary
vocab = ['happy', 'sad', 'angry', 'surprised', 'neutral']

# Streamlit app
st.title("Image Generation using VAE")

keyword = st.selectbox('Select a keyword:', vocab)
generate_button = st.button('Generate Image')

if generate_button:
    condition = encode_keyword(keyword, vocab)

    # Generate random noise
    noise = np.random.normal(0, 1, (1, 100))  # Adjust the dimensions according to your model input

    # Combine noise and condition
    input_vector = np.concatenate([noise, condition.reshape(1, -1)], axis=1)

    # Generate the image
    generated_image = model.predict(input_vector)

    # Scale the pixel values to [0, 255]
    generated_image = (generated_image * 127.5 + 127.5).astype(np.uint8)
    generated_image = generated_image[0]  # Remove the batch dimension

    # Display the image
    st.image(generated_image, use_column_width=True)
In [ ]:
!pip install pyngrok
Collecting pyngrok
  Downloading pyngrok-7.2.0-py3-none-any.whl (22 kB)
Requirement already satisfied: PyYAML>=5.1 in /usr/local/lib/python3.10/dist-packages (from pyngrok) (6.0.1)
Installing collected packages: pyngrok
Successfully installed pyngrok-7.2.0
In [52]:
from pyngrok import ngrok

# Run Streamlit in the background
!streamlit run app.py &

# Set up ngrok to tunnel to the Streamlit app
public_url = ngrok.connect(port='8501')
print(f"Streamlit app is live at: {public_url}")
Collecting usage statistics. To deactivate, set browser.gatherUsageStats to false.


  You can now view your Streamlit app in your browser.

  Local URL: http://localhost:8501
  Network URL: http://172.28.0.12:8501
  External URL: http://34.42.123.165:8501

  Stopping...
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-52-ff30c2567270> in <cell line: 7>()
      5 
      6 # Set up ngrok to tunnel to the Streamlit app
----> 7 public_url = ngrok.connect(port='8501')
      8 print(f"Streamlit app is live at: {public_url}")

/usr/local/lib/python3.10/dist-packages/pyngrok/ngrok.py in connect(addr, proto, name, pyngrok_config, **options)
    314             options.pop("auth")
    315 
--> 316     api_url = get_ngrok_process(pyngrok_config).api_url
    317 
    318     logger.debug(f"Creating tunnel with options: {options}")

/usr/local/lib/python3.10/dist-packages/pyngrok/ngrok.py in get_ngrok_process(pyngrok_config)
    152         pyngrok_config = conf.get_default()
    153 
--> 154     install_ngrok(pyngrok_config)
    155 
    156     return process.get_process(pyngrok_config)

/usr/local/lib/python3.10/dist-packages/pyngrok/ngrok.py in install_ngrok(pyngrok_config)
     97 
     98     if not os.path.exists(pyngrok_config.ngrok_path):
---> 99         installer.install_ngrok(pyngrok_config.ngrok_path, ngrok_version=pyngrok_config.ngrok_version)
    100 
    101     config_path = conf.get_config_path(pyngrok_config)

/usr/local/lib/python3.10/dist-packages/pyngrok/installer.py in install_ngrok(ngrok_path, ngrok_version, **kwargs)
    122         download_path = _download_file(url, **kwargs)
    123 
--> 124         _install_ngrok_zip(ngrok_path, download_path)
    125     except Exception as e:
    126         raise PyngrokNgrokInstallError(f"An error occurred while downloading ngrok from {url}: {e}")

/usr/local/lib/python3.10/dist-packages/pyngrok/installer.py in _install_ngrok_zip(ngrok_path, zip_path)
    139     with zipfile.ZipFile(zip_path, "r") as zip_ref:
    140         logger.debug(f"Extracting ngrok binary from {zip_path} to {ngrok_path} ...")
--> 141         zip_ref.extractall(os.path.dirname(ngrok_path))
    142 
    143     os.chmod(ngrok_path, int("777", 8))

/usr/lib/python3.10/zipfile.py in extractall(self, path, members, pwd)
   1657 
   1658         for zipinfo in members:
-> 1659             self._extract_member(zipinfo, path, pwd)
   1660 
   1661     @classmethod

/usr/lib/python3.10/zipfile.py in _extract_member(self, member, targetpath, pwd)
   1712         with self.open(member, pwd=pwd) as source, \
   1713              open(targetpath, "wb") as target:
-> 1714             shutil.copyfileobj(source, target)
   1715 
   1716         return targetpath

/usr/lib/python3.10/shutil.py in copyfileobj(fsrc, fdst, length)
    196         if not buf:
    197             break
--> 198         fdst_write(buf)
    199 
    200 def _samefile(src, dst):

KeyboardInterrupt: 
In [53]:
# Install nbconvert if not already installed
!pip install nbconvert

# Convert the notebook to HTML (Replace 'your_notebook.ipynb' with your actual notebook name)
!jupyter nbconvert --to html '/content/Vae.ipynb'

# List the files in the current directory to confirm the HTML file creation
!ls /content

# Optional: Display the HTML file in Colab
from IPython.display import HTML
with open('/content/Vae.html', 'r') as f:
    html_data = f.read()
HTML(html_data)
Requirement already satisfied: nbconvert in /usr/local/lib/python3.10/dist-packages (6.5.4)
Requirement already satisfied: lxml in /usr/local/lib/python3.10/dist-packages (from nbconvert) (4.9.4)
Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (4.12.3)
Requirement already satisfied: bleach in /usr/local/lib/python3.10/dist-packages (from nbconvert) (6.1.0)
Requirement already satisfied: defusedxml in /usr/local/lib/python3.10/dist-packages (from nbconvert) (0.7.1)
Requirement already satisfied: entrypoints>=0.2.2 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (0.4)
Requirement already satisfied: jinja2>=3.0 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (3.1.4)
Requirement already satisfied: jupyter-core>=4.7 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (5.7.2)
Requirement already satisfied: jupyterlab-pygments in /usr/local/lib/python3.10/dist-packages (from nbconvert) (0.3.0)
Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (2.1.5)
Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (0.8.4)
Requirement already satisfied: nbclient>=0.5.0 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (0.10.0)
Requirement already satisfied: nbformat>=5.1 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (5.10.4)
Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from nbconvert) (24.1)
Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (1.5.1)
Requirement already satisfied: pygments>=2.4.1 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (2.16.1)
Requirement already satisfied: tinycss2 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (1.3.0)
Requirement already satisfied: traitlets>=5.0 in /usr/local/lib/python3.10/dist-packages (from nbconvert) (5.7.1)
Requirement already satisfied: platformdirs>=2.5 in /usr/local/lib/python3.10/dist-packages (from jupyter-core>=4.7->nbconvert) (4.2.2)
Requirement already satisfied: jupyter-client>=6.1.12 in /usr/local/lib/python3.10/dist-packages (from nbclient>=0.5.0->nbconvert) (6.1.12)
Requirement already satisfied: fastjsonschema>=2.15 in /usr/local/lib/python3.10/dist-packages (from nbformat>=5.1->nbconvert) (2.20.0)
Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.10/dist-packages (from nbformat>=5.1->nbconvert) (4.19.2)
Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.10/dist-packages (from beautifulsoup4->nbconvert) (2.5)
Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from bleach->nbconvert) (1.16.0)
Requirement already satisfied: webencodings in /usr/local/lib/python3.10/dist-packages (from bleach->nbconvert) (0.5.1)
Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.1->nbconvert) (23.2.0)
Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.1->nbconvert) (2023.12.1)
Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.1->nbconvert) (0.35.1)
Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.1->nbconvert) (0.19.0)
Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.10/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (24.0.1)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.10/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (2.8.2)
Requirement already satisfied: tornado>=4.1 in /usr/local/lib/python3.10/dist-packages (from jupyter-client>=6.1.12->nbclient>=0.5.0->nbconvert) (6.3.3)
[NbConvertApp] Converting notebook /content/Vae.ipynb to html
[NbConvertApp] ERROR | Notebook JSON is invalid: Additional properties are not allowed ('metadata' was unexpected)

Failed validating 'additionalProperties' in stream:

On instance['cells'][66]['outputs'][0]:
{'metadata': {'tags': None},
 'name': 'stdout',
 'output_type': 'stream',
 'text': '\n'
         'Collecting usage statistics. To deactivate, set browser.gatherU...'}
[NbConvertApp] Writing 5408784 bytes to /content/Vae.html
app.py		  drive		    generated_faces  subset_dataset  Vae.ipynb
decoder_model.h5  encoder_model.h5  sample_data      Vae.html	     vae_model.h5
Out[53]:
Vae

Generated Image¶

In [ ]:

Importing Libraries¶

In [ ]:
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow.keras import layers, models, backend as K
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import shutil
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.preprocessing import image
from scipy.linalg import sqrtm
from numpy import cov, trace, iscomplexobj
from numpy.random import random
from scipy.stats import entropy
from IPython.display import Image
In [ ]:
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive

Mounting the Google Drive¶

In [ ]:
from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).

Reading the images from the drive¶

In [ ]:
base_dir = '/content/drive/MyDrive/FaceExpressions/dataset'
categories = ['Angry', 'Surprise', 'Sad', 'Ahegao', 'Happy', 'Neutral']

for category in categories:
    print(f'{category}:')
    path = os.path.join(base_dir, category)
    print(os.listdir(path)[:5])  # List first 5 files in each category
Angry:
['295f94b25bd4406ecebe9791a9531b38c6e5ce69fd37ead8f4f9ffdb~angry.png', '4b33a2f540fd37977aca8ede305989e23e76738f89c1c16ca9a2a620~angry.jpg', '4fd88a0a743c84a624347e2c761bcd7015a929d31b40f9a36e7ee259~angry.png', 'cropped_emotions.231374~angry.png', 'cropped_emotions.158081~angry.png']
Surprise:
['cropped_emotions.100270~12fffff.png', 'cropped_emotions.100276~12fffff.png', 'cropped_emotions.100279~12fffff.png', 'cropped_emotions.100207~12fffff.png', 'cropped_emotions.100218~12fffff.png']
Sad:
['cropped_emotions.163492.png', 'cropped_emotions.164034.png', 'cropped_emotions.153406.png', 'cropped_emotions.164233.png', 'cropped_emotions.164347.png']
Ahegao:
['cropped_emotions.189154~ahegao.png', 'cropped_emotions.190427~ahegao.png', 'lol140~ahegao.png', 'lol795~ahegao.png']
Happy:
[]
Neutral:
['0b2f9d06b850023dae03f02fc6c56a6b4b3684b3b63331838edc91ddf.jpg', '0b195e0b51a44f69d06de0dbfb1893df73769c6b065c2574ff3693cff.jpg', '0b3161a4ccc3aa541df0a73f00d106ff0912c0bb9e4a0642e9bac59af.jpg', '0b0b6d01330839782219ab3d938414cf5efeec8cd79aa2cc3d9a9e95f.jpg', '0b00131d449254c09c9b2c3b295ec677940f889a3aac6a90e6982a5bf.jpg']

Displaying images form each folder¶

In [ ]:
# Function to display images
def display_images(category, image_files):
    plt.figure(figsize=(20, 10))
    for i, img_file in enumerate(image_files):
        img_path = os.path.join(base_dir, category, img_file)
        img = mpimg.imread(img_path)
        plt.subplot(1, 5, i + 1)
        plt.imshow(img)
        plt.axis('off')
        plt.title(category)
    plt.show()

# Iterate through each category and display the first 5 images
for category in categories:
    path = os.path.join(base_dir, category)
    image_files = os.listdir(path)[:5]
    print(f'Displaying first 5 images from category: {category}')
    display_images(category, image_files)
Displaying first 5 images from category: Angry
Displaying first 5 images from category: Surprise
Displaying first 5 images from category: Sad
Displaying first 5 images from category: Ahegao
Displaying first 5 images from category: Happy
<Figure size 2000x1000 with 0 Axes>
Displaying first 5 images from category: Neutral

Load Images using "ImageDataGenerator" from keras.preprocessing.image¶

In [ ]:
datagen = ImageDataGenerator(rescale=1./255)

image_size = (128, 128)  # Resize images to 128x128

train_generator = datagen.flow_from_directory(
    base_dir,
    target_size=image_size,
    batch_size=32,
    class_mode='input',  # Input because we are using these images for unsupervised learning
    shuffle=True
)
Found 6793 images belonging to 6 classes.

Creating a subset folder¶

In [ ]:
# Create a new directory for the subset of images
subset_dir = '/content/subset_dataset'
os.makedirs(subset_dir, exist_ok=True)

Loading sample images¶

In [ ]:
# Copy 100 images from each category to the new directory
for category in categories:
    category_dir = os.path.join(base_dir, category)
    subset_category_dir = os.path.join(subset_dir, category)
    os.makedirs(subset_category_dir, exist_ok=True)

    image_files = os.listdir(category_dir)[:200]
    for img_file in image_files:
        shutil.copy(os.path.join(category_dir, img_file), subset_category_dir)
In [ ]:
# Verify the new dataset
for category in categories:
    subset_category_dir = os.path.join(subset_dir, category)
    print(f'{category}: {len(os.listdir(subset_category_dir))} images')
Angry: 24 images
Surprise: 200 images
Sad: 200 images
Ahegao: 4 images
Happy: 0 images
Neutral: 200 images
In [ ]:
# Define image size and latent dimension
image_size = (128, 128)
latent_dim = 2

Loading the sample images¶

In [ ]:
# Data Generator
datagen = ImageDataGenerator(rescale=1./255)
train_generator = datagen.flow_from_directory(
    subset_dir,
    target_size=image_size,
    batch_size=32,
    class_mode='input',
    shuffle=True
)
Found 628 images belonging to 6 classes.

Building the Variational Autoencoder¶

Encoder¶

In [ ]:
# Encoder
encoder_inputs = tf.keras.Input(shape=(image_size[0], image_size[1], 3))
x = layers.Conv2D(32, 3, activation='relu', padding='same')(encoder_inputs)
x = layers.MaxPooling2D()(x)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
x = layers.MaxPooling2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation='relu')(x)
z_mean = layers.Dense(latent_dim)(x)
z_log_var = layers.Dense(latent_dim)(x)
In [ ]:
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon

z = layers.Lambda(sampling)([z_mean, z_log_var])
In [ ]:
encoder = tf.keras.Model(encoder_inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
Model: "encoder"
__________________________________________________________________________________________________
 Layer (type)                Output Shape                 Param #   Connected to                  
==================================================================================================
 input_1 (InputLayer)        [(None, 128, 128, 3)]        0         []                            
                                                                                                  
 conv2d (Conv2D)             (None, 128, 128, 32)         896       ['input_1[0][0]']             
                                                                                                  
 max_pooling2d (MaxPooling2  (None, 64, 64, 32)           0         ['conv2d[0][0]']              
 D)                                                                                               
                                                                                                  
 conv2d_1 (Conv2D)           (None, 64, 64, 64)           18496     ['max_pooling2d[0][0]']       
                                                                                                  
 max_pooling2d_1 (MaxPoolin  (None, 32, 32, 64)           0         ['conv2d_1[0][0]']            
 g2D)                                                                                             
                                                                                                  
 flatten (Flatten)           (None, 65536)                0         ['max_pooling2d_1[0][0]']     
                                                                                                  
 dense (Dense)               (None, 128)                  8388736   ['flatten[0][0]']             
                                                                                                  
 dense_1 (Dense)             (None, 2)                    258       ['dense[0][0]']               
                                                                                                  
 dense_2 (Dense)             (None, 2)                    258       ['dense[0][0]']               
                                                                                                  
 lambda (Lambda)             (None, 2)                    0         ['dense_1[0][0]',             
                                                                     'dense_2[0][0]']             
                                                                                                  
==================================================================================================
Total params: 8408644 (32.08 MB)
Trainable params: 8408644 (32.08 MB)
Non-trainable params: 0 (0.00 Byte)
__________________________________________________________________________________________________

Decoder¶

In [ ]:
# Decoder
latent_inputs = tf.keras.Input(shape=(latent_dim,))
x = layers.Dense(32 * 32 * 64, activation='relu')(latent_inputs)
x = layers.Reshape((32, 32, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation='relu', padding='same')(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2DTranspose(32, 3, activation='relu', padding='same')(x)
x = layers.UpSampling2D()(x)
decoder_outputs = layers.Conv2DTranspose(3, 3, activation='sigmoid', padding='same')(x)
In [ ]:
decoder = tf.keras.Model(latent_inputs, decoder_outputs, name='decoder')
decoder.summary()
Model: "decoder"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 input_2 (InputLayer)        [(None, 2)]               0         
                                                                 
 dense_3 (Dense)             (None, 65536)             196608    
                                                                 
 reshape (Reshape)           (None, 32, 32, 64)        0         
                                                                 
 conv2d_transpose (Conv2DTr  (None, 32, 32, 64)        36928     
 anspose)                                                        
                                                                 
 up_sampling2d (UpSampling2  (None, 64, 64, 64)        0         
 D)                                                              
                                                                 
 conv2d_transpose_1 (Conv2D  (None, 64, 64, 32)        18464     
 Transpose)                                                      
                                                                 
 up_sampling2d_1 (UpSamplin  (None, 128, 128, 32)      0         
 g2D)                                                            
                                                                 
 conv2d_transpose_2 (Conv2D  (None, 128, 128, 3)       867       
 Transpose)                                                      
                                                                 
=================================================================
Total params: 252867 (987.76 KB)
Trainable params: 252867 (987.76 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
In [ ]:
# VAE model
outputs = decoder(encoder(encoder_inputs)[2])
vae = tf.keras.Model(encoder_inputs, outputs, name='vae')
In [ ]:
# VAE loss
reconstruction_loss = tf.keras.losses.binary_crossentropy(K.flatten(encoder_inputs), K.flatten(outputs))
reconstruction_loss *= image_size[0] * image_size[1]
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
vae.summary()
Model: "vae"
__________________________________________________________________________________________________
 Layer (type)                Output Shape                 Param #   Connected to                  
==================================================================================================
 input_1 (InputLayer)        [(None, 128, 128, 3)]        0         []                            
                                                                                                  
 encoder (Functional)        [(None, 2),                  8408644   ['input_1[0][0]']             
                              (None, 2),                                                          
                              (None, 2)]                                                          
                                                                                                  
 decoder (Functional)        (None, 128, 128, 3)          252867    ['encoder[0][2]']             
                                                                                                  
 conv2d (Conv2D)             (None, 128, 128, 32)         896       ['input_1[0][0]']             
                                                                                                  
 max_pooling2d (MaxPooling2  (None, 64, 64, 32)           0         ['conv2d[0][0]']              
 D)                                                                                               
                                                                                                  
 conv2d_1 (Conv2D)           (None, 64, 64, 64)           18496     ['max_pooling2d[0][0]']       
                                                                                                  
 max_pooling2d_1 (MaxPoolin  (None, 32, 32, 64)           0         ['conv2d_1[0][0]']            
 g2D)                                                                                             
                                                                                                  
 flatten (Flatten)           (None, 65536)                0         ['max_pooling2d_1[0][0]']     
                                                                                                  
 dense (Dense)               (None, 128)                  8388736   ['flatten[0][0]']             
                                                                                                  
 dense_2 (Dense)             (None, 2)                    258       ['dense[0][0]']               
                                                                                                  
 dense_1 (Dense)             (None, 2)                    258       ['dense[0][0]']               
                                                                                                  
 tf.reshape (TFOpLambda)     (None,)                      0         ['input_1[0][0]']             
                                                                                                  
 tf.reshape_1 (TFOpLambda)   (None,)                      0         ['decoder[0][0]']             
                                                                                                  
 tf.__operators__.add (TFOp  (None, 2)                    0         ['dense_2[0][0]']             
 Lambda)                                                                                          
                                                                                                  
 tf.math.square (TFOpLambda  (None, 2)                    0         ['dense_1[0][0]']             
 )                                                                                                
                                                                                                  
 tf.cast (TFOpLambda)        (None,)                      0         ['tf.reshape[0][0]']          
                                                                                                  
 tf.convert_to_tensor (TFOp  (None,)                      0         ['tf.reshape_1[0][0]']        
 Lambda)                                                                                          
                                                                                                  
 tf.math.subtract (TFOpLamb  (None, 2)                    0         ['tf.__operators__.add[0][0]',
 da)                                                                 'tf.math.square[0][0]']      
                                                                                                  
 tf.math.exp (TFOpLambda)    (None, 2)                    0         ['dense_2[0][0]']             
                                                                                                  
 tf.keras.backend.binary_cr  (None,)                      0         ['tf.cast[0][0]',             
 ossentropy (TFOpLambda)                                             'tf.convert_to_tensor[0][0]']
                                                                                                  
 tf.math.subtract_1 (TFOpLa  (None, 2)                    0         ['tf.math.subtract[0][0]',    
 mbda)                                                               'tf.math.exp[0][0]']         
                                                                                                  
 tf.math.reduce_mean (TFOpL  ()                           0         ['tf.keras.backend.binary_cros
 ambda)                                                             sentropy[0][0]']              
                                                                                                  
 tf.math.reduce_sum (TFOpLa  (None,)                      0         ['tf.math.subtract_1[0][0]']  
 mbda)                                                                                            
                                                                                                  
 tf.math.multiply (TFOpLamb  ()                           0         ['tf.math.reduce_mean[0][0]'] 
 da)                                                                                              
                                                                                                  
 tf.math.multiply_1 (TFOpLa  (None,)                      0         ['tf.math.reduce_sum[0][0]']  
 mbda)                                                                                            
                                                                                                  
 tf.__operators__.add_1 (TF  (None,)                      0         ['tf.math.multiply[0][0]',    
 OpLambda)                                                           'tf.math.multiply_1[0][0]']  
                                                                                                  
 tf.math.reduce_mean_1 (TFO  ()                           0         ['tf.__operators__.add_1[0][0]
 pLambda)                                                           ']                            
                                                                                                  
 add_loss (AddLoss)          ()                           0         ['tf.math.reduce_mean_1[0][0]'
                                                                    ]                             
                                                                                                  
==================================================================================================
Total params: 8661511 (33.04 MB)
Trainable params: 8661511 (33.04 MB)
Non-trainable params: 0 (0.00 Byte)
__________________________________________________________________________________________________

Training¶

In [ ]:
# Training
epochs = 10
vae.fit(train_generator, epochs=epochs)
Epoch 1/10
20/20 [==============================] - 77s 3s/step - loss: 11084.1191
Epoch 2/10
20/20 [==============================] - 65s 3s/step - loss: 10786.1504
Epoch 3/10
20/20 [==============================] - 64s 3s/step - loss: 10473.9512
Epoch 4/10
20/20 [==============================] - 62s 3s/step - loss: 10304.8027
Epoch 5/10
20/20 [==============================] - 65s 3s/step - loss: 10274.2529
Epoch 6/10
20/20 [==============================] - 66s 3s/step - loss: 10219.9023
Epoch 7/10
20/20 [==============================] - 65s 3s/step - loss: 10183.4873
Epoch 8/10
20/20 [==============================] - 65s 3s/step - loss: 10173.6689
Epoch 9/10
20/20 [==============================] - 72s 4s/step - loss: 10175.3145
Epoch 10/10
20/20 [==============================] - 65s 3s/step - loss: 10160.9453
Out[ ]:
<keras.src.callbacks.History at 0x7869c07954e0>

Model Evaluation¶

In [ ]:
# Load pretrained InceptionV3 model
inception_model = InceptionV3(include_top=False, pooling='avg', input_shape=(299, 299, 3))

def calculate_fid(real_images, generated_images):
    # Calculate activations
    act1 = inception_model.predict(real_images)
    act2 = inception_model.predict(generated_images)

    # Calculate mean and covariance
    mu1, sigma1 = act1.mean(axis=0), cov(act1, rowvar=False)
    mu2, sigma2 = act2.mean(axis=0), cov(act2, rowvar=False)

    # Calculate sum squared difference between means
    ssdiff = np.sum((mu1 - mu2) ** 2.0)

    # Calculate sqrt of product between covariances
    covmean = sqrtm(sigma1.dot(sigma2))

    # Check and correct imaginary numbers from sqrtm
    if iscomplexobj(covmean):
        covmean = covmean.real

    fid = ssdiff + trace(sigma1 + sigma2 - 2.0 * covmean)
    return fid
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5
87910968/87910968 [==============================] - 2s 0us/step
In [ ]:
# Function to preprocess images for InceptionV3
def preprocess_images(imgs):
    imgs = tf.image.resize(imgs, (299, 299))
    imgs = preprocess_input(imgs)
    return imgs

Real Images

In [ ]:
# Load Real Images
real_datagen = ImageDataGenerator(rescale=1./255)
real_generator = real_datagen.flow_from_directory(
    subset_dir,
    target_size=(299, 299),  # Resize to 299x299 for InceptionV3
    batch_size=600,  # Load all 600 real images (100 per category)
    class_mode=None,
    shuffle=True
)
real_images = next(real_generator)
Found 628 images belonging to 6 classes.

Generated Images

In [ ]:
# Generate Images using VAE
def generate_images(model, latent_dim, num_images):
    z_sample = np.random.normal(size=(num_images, latent_dim))
    generated_images = model.predict(z_sample)
    return generated_images

generated_images = generate_images(decoder, latent_dim, 600)
generated_images_resized = tf.image.resize(generated_images, (299, 299))
19/19 [==============================] - 6s 327ms/step

Preprocess Real and Generated images

In [ ]:
# Preprocess images for InceptionV3
real_images_preprocessed = preprocess_input(real_images)
generated_images_preprocessed = preprocess_input(generated_images_resized)
In [ ]:
# Calculate FID
fid = calculate_fid(real_images_preprocessed, generated_images_preprocessed)
print('FID:', fid)
19/19 [==============================] - 155s 8s/step
19/19 [==============================] - 152s 8s/step
FID: 1.3319541321978917e+53
In [ ]:
# Ensure generated images are in the correct format (299x299x3) for InceptionV3
generated_images_resized_inception = np.array(generated_images_resized)
In [ ]:
# Ensure images are preprocessed for InceptionV3 model
def preprocess_images(images):
    images = preprocess_input(images)
    return images
In [ ]:
# Preprocess generated images
generated_images_preprocessed = preprocess_images(generated_images_resized_inception)

# Predict class probabilities for generated images
preds = inception_model.predict(generated_images_preprocessed)
19/19 [==============================] - 158s 8s/step
In [ ]:
def calculate_inception_score(preds, num_splits=10):
    scores = []
    split_size = preds.shape[0] // num_splits

    for i in range(num_splits):
        split_preds = preds[i * split_size:(i + 1) * split_size]
        p_yx = np.mean(split_preds, axis=0)  # Average class probabilities across the split
        scores.append(entropy(p_yx, base=2))  # Compute entropy of the average probabilities

    inception_score = np.exp(np.mean(scores))
    return inception_score
In [ ]:
# Calculate Inception Score
inception_score = calculate_inception_score(preds)
print('Inception Score:', inception_score)
Inception Score: 5902.863071669922
In [ ]:
print('FID:', fid)
FID: 1.3319541321978917e+53

Generate Images¶

In [ ]:
# Define the categories
categories = ['Angry', 'Surprise', 'Sad', 'Ahegao', 'Happy', 'Neutral']

# Generate one image for each category
def generate_images_per_category(model, latent_dim, num_images=1):
    generated_images = []
    for _ in range(num_images * len(categories)):
        z_sample = np.random.normal(size=(1, latent_dim))
        generated_image = model.predict(z_sample)
        generated_images.append(generated_image[0])
    return np.array(generated_images)
In [ ]:
# Generate images
generated_images = generate_images_per_category(decoder, latent_dim, num_images=1)
1/1 [==============================] - 0s 76ms/step
1/1 [==============================] - 0s 60ms/step
1/1 [==============================] - 0s 52ms/step
1/1 [==============================] - 0s 55ms/step
1/1 [==============================] - 0s 56ms/step
1/1 [==============================] - 0s 64ms/step
In [ ]:
# Plot generated images
def plot_generated_images(images, categories):
    plt.figure(figsize=(20, 10))
    for i, img in enumerate(images):
        plt.subplot(1, len(categories), i + 1)
        plt.imshow((img * 255).astype(np.uint8))
        plt.axis('off')
        plt.title(categories[i])
    plt.show()

plot_generated_images(generated_images, categories)
In [ ]:
# Define the categories
categories = ['Angry', 'Surprise', 'Sad', 'Ahegao', 'Happy', 'Neutral']

# Function to generate and save faces
def save_generated_faces(model, latent_dim, categories, save_dir='/content/generated_faces'):
    os.makedirs(save_dir, exist_ok=True)
    for i, category in enumerate(categories):
        z_sample = np.random.normal(size=(1, latent_dim))
        generated_image = model.predict(z_sample)
        generated_image = (generated_image[0] * 255).astype(np.uint8)
        plt.imsave(os.path.join(save_dir, f'{category}.png'), generated_image)
save_generated_faces(decoder, latent_dim, categories)
1/1 [==============================] - 0s 59ms/step
1/1 [==============================] - 0s 51ms/step
1/1 [==============================] - 0s 59ms/step
1/1 [==============================] - 0s 52ms/step
1/1 [==============================] - 0s 50ms/step
1/1 [==============================] - 0s 61ms/step
In [ ]:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models

# Example VAE definition
latent_dim = 2

# Encoder
encoder_inputs = tf.keras.Input(shape=(28, 28, 1))
x = layers.Flatten()(encoder_inputs)
x = layers.Dense(128, activation='relu')(x)
z_mean = layers.Dense(latent_dim, name='z_mean')(x)
z_log_var = layers.Dense(latent_dim, name='z_log_var')(x)

# Sampling layer
def sampling(args):
    z_mean, z_log_var = args
    batch = tf.shape(z_mean)[0]
    dim = tf.shape(z_mean)[1]
    epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
    return z_mean + tf.exp(0.5 * z_log_var) * epsilon

z = layers.Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

encoder = models.Model(encoder_inputs, [z_mean, z_log_var, z], name='encoder')

# Decoder
decoder_inputs = tf.keras.Input(shape=(latent_dim,))
x = layers.Dense(128, activation='relu')(decoder_inputs)
x = layers.Dense(28 * 28, activation='sigmoid')(x)
decoder_outputs = layers.Reshape((28, 28, 1))(x)

decoder = models.Model(decoder_inputs, decoder_outputs, name='decoder')

# VAE
outputs = decoder(encoder(encoder_inputs)[2])
vae = models.Model(encoder_inputs, outputs, name='vae')

# Compile and train the VAE
vae.compile(optimizer='adam', loss='binary_crossentropy')
(x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1) / 255.0
vae.fit(x_train, x_train, epochs=10, batch_size=32)
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 [==============================] - 0s 0us/step
Epoch 1/10
1875/1875 [==============================] - 18s 9ms/step - loss: 0.2306
Epoch 2/10
1875/1875 [==============================] - 17s 9ms/step - loss: 0.2022
Epoch 3/10
1875/1875 [==============================] - 17s 9ms/step - loss: 0.1964
Epoch 4/10
1875/1875 [==============================] - 16s 9ms/step - loss: 0.1929
Epoch 5/10
1875/1875 [==============================] - 16s 9ms/step - loss: 0.1905
Epoch 6/10
1875/1875 [==============================] - 18s 10ms/step - loss: 0.1889
Epoch 7/10
1875/1875 [==============================] - 18s 9ms/step - loss: 0.1875
Epoch 8/10
1875/1875 [==============================] - 18s 9ms/step - loss: 0.1865
Epoch 9/10
1875/1875 [==============================] - 17s 9ms/step - loss: 0.1856
Epoch 10/10
1875/1875 [==============================] - 17s 9ms/step - loss: 0.1847
Out[ ]:
<keras.src.callbacks.History at 0x7869b3c3f460>
In [ ]:
vae.save('vae_model.h5')
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3103: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.
  saving_api.save_model(
In [ ]:
encoder.save('encoder_model.h5')
decoder.save('decoder_model.h5')
WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
In [ ]:
from google.colab import files

# Download the VAE model
files.download('vae_model.h5')

# Download the encoder and decoder models
files.download('encoder_model.h5')
files.download('decoder_model.h5')
In [ ]:
pip install streamlit
Collecting streamlit
  Downloading streamlit-1.36.0-py2.py3-none-any.whl (8.6 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 8.6/8.6 MB 13.1 MB/s eta 0:00:00
Requirement already satisfied: altair<6,>=4.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.2.2)
Requirement already satisfied: blinker<2,>=1.0.0 in /usr/lib/python3/dist-packages (from streamlit) (1.4)
Requirement already satisfied: cachetools<6,>=4.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (5.4.0)
Requirement already satisfied: click<9,>=7.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (8.1.7)
Requirement already satisfied: numpy<3,>=1.20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (1.25.2)
Requirement already satisfied: packaging<25,>=20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (24.1)
Requirement already satisfied: pandas<3,>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (2.0.3)
Requirement already satisfied: pillow<11,>=7.1.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (9.4.0)
Requirement already satisfied: protobuf<6,>=3.20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (3.20.3)
Requirement already satisfied: pyarrow>=7.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (14.0.2)
Requirement already satisfied: requests<3,>=2.27 in /usr/local/lib/python3.10/dist-packages (from streamlit) (2.31.0)
Requirement already satisfied: rich<14,>=10.14.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (13.7.1)
Requirement already satisfied: tenacity<9,>=8.1.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (8.5.0)
Requirement already satisfied: toml<2,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from streamlit) (0.10.2)
Requirement already satisfied: typing-extensions<5,>=4.3.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.12.2)
Collecting gitpython!=3.1.19,<4,>=3.0.7 (from streamlit)
  Downloading GitPython-3.1.43-py3-none-any.whl (207 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 207.3/207.3 kB 16.1 MB/s eta 0:00:00
Collecting pydeck<1,>=0.8.0b4 (from streamlit)
  Downloading pydeck-0.9.1-py2.py3-none-any.whl (6.9 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.9/6.9 MB 21.7 MB/s eta 0:00:00
Requirement already satisfied: tornado<7,>=6.0.3 in /usr/local/lib/python3.10/dist-packages (from streamlit) (6.3.3)
Collecting watchdog<5,>=2.1.5 (from streamlit)
  Downloading watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl (83 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 83.0/83.0 kB 7.4 MB/s eta 0:00:00
Requirement already satisfied: entrypoints in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (0.4)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (3.1.4)
Requirement already satisfied: jsonschema>=3.0 in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (4.19.2)
Requirement already satisfied: toolz in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (0.12.1)
Collecting gitdb<5,>=4.0.1 (from gitpython!=3.1.19,<4,>=3.0.7->streamlit)
  Downloading gitdb-4.0.11-py3-none-any.whl (62 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 62.7/62.7 kB 3.4 MB/s eta 0:00:00
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2023.4)
Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2024.1)
Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (3.7)
Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (2.0.7)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (2024.7.4)
Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich<14,>=10.14.0->streamlit) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich<14,>=10.14.0->streamlit) (2.16.1)
Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit)
  Downloading smmap-5.0.1-py3-none-any.whl (24 kB)
Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->altair<6,>=4.0->streamlit) (2.1.5)
Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (23.2.0)
Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (2023.12.1)
Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.35.1)
Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.19.0)
Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit) (0.1.2)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas<3,>=1.3.0->streamlit) (1.16.0)
Installing collected packages: watchdog, smmap, pydeck, gitdb, gitpython, streamlit
Successfully installed gitdb-4.0.11 gitpython-3.1.43 pydeck-0.9.1 smmap-5.0.1 streamlit-1.36.0 watchdog-4.0.1
In [ ]:
import streamlit as st
import numpy as np
from keras.models import load_model
import matplotlib.pyplot as plt
import io
In [ ]:
# Load your trained VAE model
model = load_model('/content/vae_model.h5')

# Function to encode the keyword into a numerical format
def encode_keyword(keyword, vocab):
    encoded = np.zeros(len(vocab))
    if keyword in vocab:
        encoded[vocab.index(keyword)] = 1
    return encoded

# Example vocabulary
vocab = ['happy', 'sad', 'angry', 'surprised', 'neutral']

# Streamlit app
st.title("Image Generation using VAE")

keyword = st.selectbox('Select a keyword:', vocab)
generate_button = st.button('Generate Image')

if generate_button:
    condition = encode_keyword(keyword, vocab)

    # Generate random noise
    noise = np.random.normal(0, 1, (1, 100))  # Adjust the dimensions according to your model input

    # Combine noise and condition
    input_vector = np.concatenate([noise, condition.reshape(1, -1)], axis=1)
2024-07-22 02:03:25.305 
  Warning: to view this Streamlit app on a browser, run it with the following
  command:

    streamlit run /usr/local/lib/python3.10/dist-packages/colab_kernel_launcher.py [ARGUMENTS]
2024-07-22 02:03:25.316 Session state does not function when running a script without `streamlit run`
In [ ]:
!pip install streamlit numpy keras matplotlib
Requirement already satisfied: streamlit in /usr/local/lib/python3.10/dist-packages (1.36.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (1.25.2)
Requirement already satisfied: keras in /usr/local/lib/python3.10/dist-packages (2.15.0)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (3.7.1)
Requirement already satisfied: altair<6,>=4.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.2.2)
Requirement already satisfied: blinker<2,>=1.0.0 in /usr/lib/python3/dist-packages (from streamlit) (1.4)
Requirement already satisfied: cachetools<6,>=4.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (5.4.0)
Requirement already satisfied: click<9,>=7.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (8.1.7)
Requirement already satisfied: packaging<25,>=20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (24.1)
Requirement already satisfied: pandas<3,>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (2.0.3)
Requirement already satisfied: pillow<11,>=7.1.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (9.4.0)
Requirement already satisfied: protobuf<6,>=3.20 in /usr/local/lib/python3.10/dist-packages (from streamlit) (3.20.3)
Requirement already satisfied: pyarrow>=7.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (14.0.2)
Requirement already satisfied: requests<3,>=2.27 in /usr/local/lib/python3.10/dist-packages (from streamlit) (2.31.0)
Requirement already satisfied: rich<14,>=10.14.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (13.7.1)
Requirement already satisfied: tenacity<9,>=8.1.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (8.5.0)
Requirement already satisfied: toml<2,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from streamlit) (0.10.2)
Requirement already satisfied: typing-extensions<5,>=4.3.0 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.12.2)
Requirement already satisfied: gitpython!=3.1.19,<4,>=3.0.7 in /usr/local/lib/python3.10/dist-packages (from streamlit) (3.1.43)
Requirement already satisfied: pydeck<1,>=0.8.0b4 in /usr/local/lib/python3.10/dist-packages (from streamlit) (0.9.1)
Requirement already satisfied: tornado<7,>=6.0.3 in /usr/local/lib/python3.10/dist-packages (from streamlit) (6.3.3)
Requirement already satisfied: watchdog<5,>=2.1.5 in /usr/local/lib/python3.10/dist-packages (from streamlit) (4.0.1)
Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.2.1)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (0.12.1)
Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (4.53.1)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.4.5)
Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (3.1.2)
Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (2.8.2)
Requirement already satisfied: entrypoints in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (0.4)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (3.1.4)
Requirement already satisfied: jsonschema>=3.0 in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (4.19.2)
Requirement already satisfied: toolz in /usr/local/lib/python3.10/dist-packages (from altair<6,>=4.0->streamlit) (0.12.1)
Requirement already satisfied: gitdb<5,>=4.0.1 in /usr/local/lib/python3.10/dist-packages (from gitpython!=3.1.19,<4,>=3.0.7->streamlit) (4.0.11)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2023.4)
Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3,>=1.3.0->streamlit) (2024.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)
Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (3.7)
Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (2.0.7)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->streamlit) (2024.7.4)
Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich<14,>=10.14.0->streamlit) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich<14,>=10.14.0->streamlit) (2.16.1)
Requirement already satisfied: smmap<6,>=3.0.1 in /usr/local/lib/python3.10/dist-packages (from gitdb<5,>=4.0.1->gitpython!=3.1.19,<4,>=3.0.7->streamlit) (5.0.1)
Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->altair<6,>=4.0->streamlit) (2.1.5)
Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (23.2.0)
Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (2023.12.1)
Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.35.1)
Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6,>=4.0->streamlit) (0.19.0)
Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich<14,>=10.14.0->streamlit) (0.1.2)
In [ ]:
# Write the Streamlit app to a file
%%writefile app.py
import streamlit as st
import numpy as np
from keras.models import load_model
import matplotlib.pyplot as plt
import io
Writing app.py
In [ ]:
# Load your trained VAE model
model = load_model('/content/vae_model.h5')
In [ ]:
# Function to encode the keyword into a numerical format
def encode_keyword(keyword, vocab):
    encoded = np.zeros(len(vocab))
    if keyword in vocab:
        encoded[vocab.index(keyword)] = 1
    return encoded

# Example vocabulary
vocab = ['happy', 'sad', 'angry', 'surprised', 'neutral']

# Streamlit app
st.title("Image Generation using VAE")

keyword = st.selectbox('Select a keyword:', vocab)
generate_button = st.button('Generate Image')

if generate_button:
    condition = encode_keyword(keyword, vocab)

    # Generate random noise
    noise = np.random.normal(0, 1, (1, 100))  # Adjust the dimensions according to your model input

    # Combine noise and condition
    input_vector = np.concatenate([noise, condition.reshape(1, -1)], axis=1)

    # Generate the image
    generated_image = model.predict(input_vector)

    # Scale the pixel values to [0, 255]
    generated_image = (generated_image * 127.5 + 127.5).astype(np.uint8)
    generated_image = generated_image[0]  # Remove the batch dimension

    # Display the image
    st.image(generated_image, use_column_width=True)
In [ ]:
!pip install pyngrok
Collecting pyngrok
  Downloading pyngrok-7.2.0-py3-none-any.whl (22 kB)
Requirement already satisfied: PyYAML>=5.1 in /usr/local/lib/python3.10/dist-packages (from pyngrok) (6.0.1)
Installing collected packages: pyngrok
Successfully installed pyngrok-7.2.0
In [ ]:
from pyngrok import ngrok

# Run Streamlit in the background
!streamlit run app.py &

# Set up ngrok to tunnel to the Streamlit app
public_url = ngrok.connect(port='8501')
print(f"Streamlit app is live at: {public_url}")
Collecting usage statistics. To deactivate, set browser.gatherUsageStats to false.


  You can now view your Streamlit app in your browser.

  Local URL: http://localhost:8501
  Network URL: http://172.28.0.12:8501
  External URL: http://34.42.123.165:8501

In [ ]:

In [ ]: